code stringlengths 101 5.91M |
|---|
_bpe('fastbpe', dataclass=fastBPEConfig)
class fastBPE(object):
def __init__(self, cfg):
if (cfg.bpe_codes is None):
raise ValueError('--bpe-codes is required for --bpe=fastbpe')
codes = file_utils.cached_path(cfg.bpe_codes)
try:
import fastBPE
self.bpe = ... |
class ConnectionState():
def __init__(self):
self.sequence_number = (- 1)
self.initialized = False
self.connected = True
def update_sequence(self, request):
if (request.sequence_number <= self.sequence_number):
return
self.sequence_number = request.sequence_nu... |
def test_graphql(graphql_url):
schema = gql_loaders.from_url(graphql_url)
(initialized, *other, finished) = list(from_schema(schema, hypothesis_settings=hypothesis.settings(max_examples=5, deadline=None)).execute())
assert (initialized.operations_count == 4)
assert (finished.passed_count == 4)
for (... |
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
(scheme, server, path, parameters, query, fragment) = parts
base = urllib.parse.unquote(path.split('/')[(- 1)])
if ((server == 'sourceforge.net') and (base == 'download')):
base = urllib.parse.unquote(path.split('/')[(- 2)])
if ('... |
class Mention():
text: str
title: str
index: int
candidates: List[Candidate]
start: Optional[int] = None
end: Optional[int] = None |
def is_devicelevel_fpga(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:
from dace.sdfg.utils import is_fpga_kernel
return (is_in_scope(sdfg, state, node, [dtypes.ScheduleType.FPGA_Device]) or ((state is not None) and is_fpga_kernel(sdfg, state))) |
class FastTextEmbeddings(NeuralEmbeddings):
def __init__(self, model: str='cc.en.300.bin', force_download: bool=True, dir: str=None) -> None:
self.model = model
self.dir = dir
self.force_download = force_download
if (self.dir is None):
self.dir = f'{torch.hub.get_dir()}/{... |
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args... |
(help='')
('--log-dir', type=str, help='logging directory')
('--dataset', default='coco', type=str)
('--dataset_dir', default='', type=str)
('--im-size', default=256, type=int, help='dataset resize size')
('--crop-size', default=256, type=int)
('--window-size', default=256, type=int)
('--window-stride', default=None, t... |
def test_simple_output(simple_confusion):
assert (EXPECTED_SIMPLE_OUTPUT == format_confusion(simple_confusion)) |
def mean(*seqs: Sequence[Numeric]) -> Union[(Numeric, Sequence[Numeric])]:
singleton = (len(seqs) == 1)
means = [float(np.mean(seq)) for seq in seqs]
return (means[0] if singleton else means) |
class CrystalOfNakajimaMonomials(InfinityCrystalOfNakajimaMonomials):
def __classcall_private__(cls, cartan_type, La=None, c=None):
if (La is None):
La = cartan_type
cartan_type = La.parent().cartan_type()
cartan_type = CartanType(cartan_type)
if cartan_type.is_affine... |
def split_on_phrase_rgx(sentences, doc, rgx, threshold=250):
splits = []
for sent in sentences:
matches = re.findall(rgx, sent.text)
if ((len(sent.text) >= threshold) and matches):
offset = sent[0].idx
m_idxs = set()
for m in matches:
m_idxs.ad... |
_module()
class ImgInpaintingDataset(BaseDataset):
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
super().__init__(pipeline, test_mode)
self.ann_file = str(ann_file)
self.data_prefix = str(data_prefix)
self.data_infos = self.load_annotations()
def load... |
_REGISTRY.register()
class Imagenet(torch.utils.data.Dataset):
def __init__(self, cfg, mode, num_retries=10):
self.num_retries = num_retries
self.cfg = cfg
self.mode = mode
self.data_path = cfg.DATA.PATH_TO_DATA_DIR
assert (mode in ['train', 'val', 'test']), "Split '{}' not s... |
def ctest_args(args_list):
parser = argparse.ArgumentParser(description='Compare two npz tensor files.')
parser.add_argument('npz_file', help='Reference file with fp32 data')
parser.add_argument('--calibration_table', type=str, required=True, help='calibration table of npz file')
args = parser.parse_arg... |
def download(out_dir, category, set_name, tag):
url = '
if (set_name == 'test'):
out_name = 'test_lmdb.zip'
else:
out_name = '{category}_{set_name}_lmdb.zip'.format(**locals())
out_path = os.path.join(out_dir, out_name)
print(url, out_path)
cmd = ['curl', url, '-o', out_path]
... |
_level_function(module='ak.str')
def replace_substring_regex(array, pattern, replacement, *, max_replacements=None, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, pattern, replacement, max_replacements, highlevel, behavior, attrs) |
def tokens_to_PartStaff(tokens, key_=0, start_voice=1):
tokens = concatenated_to_regular(tokens)
p = stream.PartStaff()
k = key.KeySignature(key_)
voice_id = start_voice
voice_flag = False
after_voice = False
voice_start = None
ottava_flag = False
ottava_elements = []
tokens = ag... |
def load_backward(state):
new_state = collections.OrderedDict()
for (key, val) in state.items():
multi = False
if key.startswith('module.'):
multi = True
key = key[len('module.'):]
if (key == 'true_help'):
continue
if key.startswith('bert_q.'):... |
class FileHandler(StreamHandler):
def __init__(self, filename, mode='a', encoding=None, delay=False):
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
Handler.__init__(self)
self.strea... |
class ConvertToPyTorchModel(nn.Module):
def __init__(self, base_model, classify_fn_args, classify=None, normalization=None, class_sublist=None, adversarial_attack=None):
super().__init__()
if (normalization is not None):
self.input_space = normalization.input_space
self.mean ... |
def make_embeddings(opt, word_dict, for_encoder=True):
embedding_dim = opt.word_vec_size
word_padding_idx = word_dict.to_ind(markers.PAD)
num_word_embeddings = len(word_dict)
return Embeddings(word_vec_size=embedding_dim, position_encoding=False, dropout=opt.dropout, word_padding_idx=word_padding_idx, w... |
def get_user_detail(user_id, html):
user = person.get_detail(html, user_id)
if (user is not None):
user.uid = user_id
user.follows_num = person.get_friends(html)
user.fans_num = person.get_fans(html)
user.wb_num = person.get_status(html)
return user |
def test_ClusterNodeSequence_getitem():
G = create_stellargraph()
nsg = ClusterNodeSequence(graph=G, clusters=[['a'], ['b'], ['c'], ['d']], node_ids=['a', 'b', 'd'])
assert (len(nsg) == 4)
for cluster in list(nsg):
print(cluster)
assert (len(cluster) == 2)
assert (len(cluster[0][... |
def proximal_policy_optimization_loss(curr_prediction, curr_onehot, old_prediction, old_onehotpred, rewards, advantage, clip_val, beta=None):
rewards_ = tf.squeeze(rewards, axis=1)
advantage_ = tf.squeeze(advantage, axis=1)
entropy = 0
r = 1
for (t, (p, onehot, old_p, old_onehot)) in enumerate(zip(c... |
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob) |
_model_architecture('transformer_lm', 'transformer_lm_gpt3_6_7')
def transformer_lm_gpt3_6_7(args):
args.decoder_layers = safe_getattr(args, 'decoder_layers', 32)
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 4096)
args.decoder_attention_heads = safe_getattr(args, 'decoder_attention_heads... |
class Counter():
def __init__(self):
self.count = 0
def trigger(self, detector, info):
self.count += 1 |
class AttentionModule(AbstractMILUnit):
def add_layers(self):
self.parent_module.mil_attn_V = nn.Linear((512 * 4), 128, bias=False)
self.parent_module.mil_attn_U = nn.Linear((512 * 4), 128, bias=False)
self.parent_module.mil_attn_w = nn.Linear(128, 1, bias=False)
self.parent_module.c... |
def _is_batch_set(obj: Any) -> bool:
if isinstance(obj, np.ndarray):
return ((obj.dtype == object) and all((isinstance(element, (dict, Batch)) for element in obj)))
elif isinstance(obj, (list, tuple)):
if ((len(obj) > 0) and all((isinstance(element, (dict, Batch)) for element in obj))):
... |
('mpi4py.MPI.COMM_WORLD.Bcast')
('dace.comm.Bcast')
def _bcast(pv: ProgramVisitor, sdfg: SDFG, state: SDFGState, buffer: str, root: Union[(str, sp.Expr, Number)]=0, grid: str=None, fcomm: str=None):
from dace.libraries.mpi.nodes.bcast import Bcast
libnode = Bcast('_Bcast_', grid, fcomm)
desc = sdfg.arrays[b... |
def _utt2spk_keydict(path):
utt2spk = {}
with open(path, 'r') as fi:
for line in fi:
(utt, spk) = line.strip().split()
utt2spk[utt] = spk
return utt2spk |
class FiniteDimensionalSemisimpleAlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
_base_category_class_and_axiom = (SemisimpleAlgebras.FiniteDimensional, 'WithBasis')
class ParentMethods():
def radical_basis(self, **keywords):
return ()
_method
def central_orthogonal_idem... |
def get_prediction(img_path, threshold):
img = Image.open(img_path)
transform = T.Compose([T.ToTensor()])
img = transform(img)
pred = model([img])
pred_score = list(pred[0]['scores'].detach().numpy())
pred_t = [pred_score.index(x) for x in pred_score if (x > threshold)][(- 1)]
masks = (pred[... |
class SimpleTaggerTest(ModelTestCase):
def setUp(self):
super(SimpleTaggerTest, self).setUp()
self.set_up_model('tests/fixtures/simple_tagger/experiment.json', 'tests/fixtures/data/sequence_tagging.tsv')
def test_simple_tagger_can_train_save_and_load(self):
self.ensure_model_can_train_sa... |
def test_multi_objective_empty_losses():
with pytest.raises(ValueError):
multi_cdv.get_descent_vector([], gradient) |
def _make_imitator_inputs(trainer: transformers.Trainer, task_model: torch.nn.Module, inputs: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
(logits, _, _) = misc_utils.predict(trainer=trainer, model=task_model, inputs=inputs)
imitator_inputs = deepcopy(inputs)
imitator_inputs['labels'] = torch.te... |
def qqp_logits_sentence_encoding(s1_rep, s2_rep, afn, n_state, is_train, clf_dropout, highway=False):
out_rep = tf.concat([tf.abs((s1_rep - s2_rep)), (s1_rep * s2_rep)], (- 1))
act = act_name2fn(afn)
h = act(conv1d(out_rep, 'c_fc', n_state, 1, train=is_train))
if highway:
trans = conv1d(h, 'c_tr... |
class MMProbe(t.nn.Module):
def __init__(self, direction, covariance=None, inv=None, atol=0.001):
super().__init__()
self.direction = t.nn.Parameter(direction, requires_grad=False)
if (inv is None):
self.inv = t.nn.Parameter(t.linalg.pinv(covariance, hermitian=True, atol=atol), r... |
def disable_autodiff_subgraph_inlining(enabled=True):
torch._C._debug_set_autodiff_subgraph_inlining((not enabled))
try:
(yield)
finally:
torch._C._debug_set_autodiff_subgraph_inlining(True) |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', only_best=False, logdir=''):
resfile = os.path.join(logdir, filename)
if is_best:
torch.save(state, resfile)
shutil.copyfile(resfile, os.path.join(logdir, 'model_temp_best.pth.tar'))
os.remove(resfile)
if only_best:
... |
def build_transforms_hist(cfg, is_train=True, PIXEL_MEAN=[0.485, 0.456, 0.406], PIXEL_STD=[0.229, 0.224, 0.225]):
normalize_transform = T.Normalize(mean=PIXEL_MEAN, std=PIXEL_STD)
transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.ToTensor()])
return transform |
def get_cmd(task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch, warmup, model_dir, summary_dir, res_fn, max_steps=None, save_steps=None, log_steps=None):
if (max_steps is None):
cmd_str = ('bash exp_with_args.sh %s %s %s %d %d %d %d %d %d %d %d %d %s %s %s' % (ta... |
def test_hub_modelcardhelper(request, save_path):
model = prep_model()
hmch = HubModelCardHelper(license_info='cc-by-4.0', model_cls_name='SCVI', model_init_params=model.init_params_, model_setup_anndata_args=model.adata_manager._get_setup_method_args()['setup_args'], model_summary_stats=model.summary_stats, mo... |
def test_angular_neighbors():
vectors = [[0, 0, 1], [0, 0, 3], [1, 2, 3], [(- 1), (- 2), (- 3)]]
neighbors = angular_neighbors(vectors, 2)
true_neighbors = np.array([[1, 2], [0, 2], [0, 1], [0, 1]])
assert_equal(neighbors, true_neighbors) |
def prelu_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, base_axis=1):
dy = grad_inputs[0]
x0 = inputs[0]
w0 = inputs[1]
base_axis += (x0.ndim * (base_axis < 0))
m0 = F.greater_scalar(x0, 0)
m1 = (1 - m0)
m0 = no_grad(m0)
m1 = no_grad(m1)
if (w0.shape == ()):
... |
def _real_entropy_individual(traj):
time_series = tuple(map(tuple, traj[[constants.LATITUDE, constants.LONGITUDE]].values))
entropy = _true_entropy(time_series)
return entropy |
def evaluate(model: Model, instances: Iterable[Instance], data_iterator: DataIterator, cuda_device: int, label_fname: str) -> Dict[(str, Any)]:
_warned_tqdm_ignores_underscores = False
check_for_gpu(cuda_device)
with torch.no_grad():
model.eval()
label_file = open(label_fname, 'w')
l... |
class Conv2d(_ConvNd):
_FLOAT_MODULE = nn.Conv2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pa... |
class HMGNN(nn.Module):
def __init__(self, num_convs, dg_node_type_universe, lg_node_type_universe, dg_num_interaction_residuals, lg_num_interaction_residuals, dg_num_residuals, lg_num_residuals, rbf_dim, cut_r, dg_mean, lg_mean, dg_std, lg_std, hidden_dim, activation, feat_drop):
super(HMGNN, self).__init_... |
class PatchInferencer():
def __init__(self, model_weight_file, output_patch_mask):
self.output_patch_mask = output_patch_mask
sys.path.append(model_weight_file)
from pznet.pznet import PZNet
self.net = PZNet(model_weight_file)
def compute_device(self):
return platform.pro... |
def romanian_preprocessing(text):
text = text.replace('S', 'S').replace('s', 's')
text = text.replace('T', 'T').replace('t', 't')
text = text.replace('S', 'S').replace('s', 's')
text = text.replace('T', 'T').replace('t', 't')
text = text.replace('A', 'A').replace('a', 'a')
text = text.replace('A... |
class TestRegression(object):
def test_masked_array_create(self):
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
def test_masked_array(self):
np.ma.array(1, mask=[1])
def test_mem_masked_where(self)... |
def test_from_pandas_contextual_severity():
anomalies = pd.DataFrame({'start': [2, 8], 'end': [5, 9], 'severity': [0.1, 0.2]})
expected_return = [(2, 5, 0.1), (8, 9, 0.2)]
returned = from_pandas_contextual(anomalies)
assert_list_tuples(returned, expected_return) |
def test_big():
a = ak.highlevel.ArrayBuilder(initial=90)
for i in range(2000):
if (i == 200):
tmp = a.snapshot()
a.boolean(((i % 2) == 0))
assert (to_list(a) == ([True, False] * 1000))
assert (to_list(tmp) == ([True, False] * 100)) |
.parametrize('basis, quad', ((list(product(ctrialBasis, cquads)) + list(product(ltrialBasis, lquads))) + list(product(latrialBasis, lagquads))))
def test_div2(basis, quad):
B = basis(10, quad=quad)
u = shenfun.TrialFunction(B)
v = shenfun.TestFunction(B)
m = inner(u, v)
z = Function(B, val=1)
c ... |
def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1,... |
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride... |
class SensitivityExplanation(ExplanationBase):
def __init__(self):
super().__init__()
self.explanations = defaultdict(dict)
def add(self, feature_name, mu, mu_star, sigma, mu_star_conf):
self.explanations[feature_name] = {'mu': mu, 'mu_star': mu_star, 'sigma': sigma, 'mu_star_conf': mu_s... |
def SwitchNot(name, *conditions):
conditions = _MakeList(conditions)
return core.scoped_execution_step(_get_next_step_name('SwitchNot', name), [_RunOnceIfNot((name + '/SwitchNot'), cond, step) for (cond, step) in conditions]) |
def get_weight_norm(model):
return torch.norm(torch.stack([torch.norm(p[1].detach()) for p in model.named_parameters() if ('weight' in p[0])])) |
class _IntegerLessThan(Constraint):
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def check(self, value):
return (((value % 1) == 0) & (value <= self.upper_bound)) |
def push_to_influx(metric_name: str, value: int, labels: dict) -> bool:
return batch_push_to_influx([(metric_name, value, labels)]) |
class AdjacentTempDirectory(TempDirectory):
LEADING_CHARS = '-~.=%'
def __init__(self, original, delete=None):
self.original = original.rstrip('/\\')
super(AdjacentTempDirectory, self).__init__(delete=delete)
def _generate_names(cls, name):
for i in range(1, len(name)):
f... |
class CapFiltCaptionDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.img_ids = {}
n = 0
for ann in self.annotation:
ann['image_id'] = ''.jo... |
class TestDatasetFromList(unittest.TestCase):
((sys.version_info.minor <= 6), 'Not supported in Python 3.6')
def test_using_lazy_path(self):
dataset = []
for i in range(10):
dataset.append({'file_name': LazyPath(partial(_a_slow_func, i))})
dataset = DatasetFromList(dataset)
... |
def get_git_commit_hash():
import subprocess
p = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
(git_commit, _) = p.communicate()
git_commit = git_commit.strip().decode('utf-8')
return git_commit |
class BertTokenizer(object):
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, never_split=('[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]')):
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabul... |
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
def can_parse(cls, obj):
return (obj.kind == cls.kind)
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', Non... |
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
model = Smooth(model, d['noise_sigma'], d['n'], d['alpha'], d['mean'], d['std'])
return model
return classifier_loader |
def test_validate_series(df_broken_email: pd.DataFrame) -> None:
df_valid = validate_email(df_broken_email['messy_email'])
df_check = pd.Series([True, True, False, True, False, False, False, False], name='messy_lat_long')
assert df_check.equals(df_valid) |
_module
class NonLinearNeckV0(nn.Module):
def __init__(self, in_channels, hid_channels, out_channels, sync_bn=False, with_avg_pool=True):
super(NonLinearNeckV0, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
... |
def tf_test_error_rate(logits, x, X_test, y_test):
assert (len(X_test) == len(y_test))
eval_prediction = K.softmax(logits)
predictions = batch_eval([x], [eval_prediction], [X_test])[0]
return error_rate(predictions, y_test) |
class Credentials(ABC, LoggingBase):
def __init__(self):
super().__init__()
'\n Create credentials instance from user config and cached values.\n '
def deserialize(config: dict, cache: Cache, handlers: LoggingHandlers) -> 'Credentials':
pass
'\n Serialize to JSON for sto... |
def _process_group_construct_rpc_backend_options_handler(rpc_timeout, init_method, num_send_recv_threads=rpc_constants.DEFAULT_NUM_SEND_RECV_THREADS, **kwargs):
from . import ProcessGroupRpcBackendOptions
return ProcessGroupRpcBackendOptions(rpc_timeout=rpc_timeout, init_method=init_method, num_send_recv_thread... |
def test_BBPSSWMessage():
msg = BBPSSWMessage(BBPSSWMsgType.PURIFICATION_RES, 'another', meas_res=0)
assert (msg.msg_type == BBPSSWMsgType.PURIFICATION_RES)
assert (msg.receiver == 'another')
assert (msg.meas_res == 0)
with pytest.raises(Exception):
BBPSSWMessage('unknown type') |
class ValidationMonitor(object):
def __init__(self, writer):
self._writer = writer
def add(self, i, val_results):
all_test_metric = val_results[0]
val_loss = val_results[1]
self._writer.add_scalar('Metrics/1_ER-LD', all_test_metric[0], i)
self._writer.add_scalar('Metrics/... |
class ONMTDatasetBase(torchtext.data.Dataset):
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
return super(ONMTDatasetBase, self).__reduce_ex__()
def load_fields(self, vocab_dict):
from onmt.... |
def knn(m_xx, m_xy, m_yy, k, sqrt=False):
n0 = m_xx.size(0)
n1 = m_yy.size(0)
label = torch.cat((torch.ones(n0), torch.zeros(n1))).to(m_xx)
mat = torch.cat((torch.cat((m_xx, m_xy), 1), torch.cat((m_xy.transpose(0, 1), m_yy), 1)), 0)
if sqrt:
mat = mat.abs().sqrt()
(val, idx) = (mat + tor... |
(scope='module')
def dataframe_only_item_none_pandas():
data_only_item_none = [(1, [2, 0, 0, 0, 0, 0], [19842]), (1, [2, 4, 0, 0, 0, 0], [19842, 19844]), (1, [2, 4, 3, 0, 0, 0], [19842, 19844, 19843]), (1, [2, 4, 3, 5, 0, 0], [19842, 19844, 19843, 19845]), (1, [2, 4, 3, 5, 6, 0], [19842, 19844, 19843, 19845, 19846]... |
class MultiPrototypes(nn.Module):
def __init__(self, output_dim, nmb_prototypes):
super(MultiPrototypes, self).__init__()
self.nmb_heads = len(nmb_prototypes)
for (i, k) in enumerate(nmb_prototypes):
self.add_module(('prototypes' + str(i)), nn.Linear(output_dim, k, bias=False))
... |
def load_questions(filename='questions.csv'):
questions = pd.read_csv(filename)
questions.dropna(axis=1, how='all', inplace=True)
return questions |
def validate_pathname_binary_tuple(data):
if (not isinstance(data, tuple)):
raise TypeError('pathname binary data should be tuple type, but got {}'.format(type(data)))
if (len(data) != 2):
raise TypeError('pathname binary tuple length should be 2, but got {}'.format(str(len(data))))
if (not ... |
def _get_string_replacement(tok: Token) -> List[Token]:
result = []
if ((tok.ttype == tokens.Token.Literal.String.Symbol) or (tok.ttype == tokens.Token.Literal.String.Single)):
v = tok.value
result.append((v[0] + v[(- 1)]))
(start, end) = (1, (len(v) - 1))
for span_start in range... |
class InstanceNorm1d(torch.nn.InstanceNorm1d):
def __init__(self, num_features, weight, bias, scale, zero_point, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False):
super(InstanceNorm1d, self).__init__(num_features, eps, momentum, affine, track_running_stats)
self.weight = weight
... |
def compare(fitness_1: float, fitness_2: float) -> int:
if (fitness_1 < fitness_2):
return (- 1)
if (fitness_1 > fitness_2):
return 1
return 0 |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--wordvec_pretrain_file', type=str, default=None, help='Exact name of the pretrain file to read')
parser.add_argument('--charlm', default='default', type=str, help='Which charlm to run on. Will use the default charlm for this languag... |
def makedir(dir_path):
is_success = False
try:
if (not g_pathmgr.exists(dir_path)):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
print(f'Error creating directory: {dir_path}')
return is_success |
class AccumulateMeter(object):
def __init__(self, greater_is_better=True, print_precision=4):
self.greater_is_better = greater_is_better
self.print_precision = print_precision
self.reset()
def reset(self):
self.avg = 0.0
self.val = 0.0
self.count = 0
def updat... |
class GranularizePipe(Pipe):
def __init__(self, task=None):
super().__init__()
self.task = task
def _granularize(self, data_bundle, tag_map):
for name in list(data_bundle.datasets.keys()):
dataset = data_bundle.get_dataset(name)
dataset.apply_field((lambda target:... |
def add_variables(field, variables):
if (not variables):
return field
if is_FractionField(field):
R = field.ring()
if (is_PolynomialRing(R) or is_MPolynomialRing(R)):
new_variables = list(R.variable_names())
for v in variables:
if (v not in new_var... |
def require_cython(test_case):
return unittest.skipUnless(is_cython_available(), 'test requires cython')(test_case) |
_grad()
def check_forward_equal_with_pytorch_float():
value = (torch.rand(N, S, M, D).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdi... |
def simulate_policy():
file = './her-sac-fetch-experiment/her-sac-fetch-experiment_2020_07_07_11_11_14_0000--s-0/params.pkl'
data = torch.load(file)
policy = data['evaluation/policy']
policy.reset()
def policy_func(obs):
(a, agent_info) = policy.get_action(obs)
return a
task = ge... |
class Metadata():
platform: PlatformMetadata = field(default_factory=PlatformMetadata)
interpreter: InterpreterMetadata = field(default_factory=InterpreterMetadata)
cli: CliMetadata = field(default_factory=CliMetadata)
docker_image: (str | None) = field(default_factory=(lambda : os.getenv(DOCKER_IMAGE_E... |
def p_matrix(p):
(startl, endl) = p.linespan(0)
(startc, endc) = p.lexspan(0)
di0 = dace.dtypes.DebugInfo(startl, startc, endl, endc)
if (len(p) == 3):
p[0] = AST_Matrix(di0, [])
else:
p[0] = AST_Matrix(di0, p[2]) |
_module()
class Runner(EpochBasedRunner):
def __init__(self, *args, **kwargs):
warnings.warn('Runner was deprecated, please use EpochBasedRunner instead')
super().__init__(*args, **kwargs) |
def convert_to_float(value):
if isinstance(value, float):
return value
if isinstance(value, int):
return float(value)
if (not isinstance(value, str)):
raise ValueError("Argument value is not a string. Can't parse it as float")
sanitized = value
try:
if (('.' in saniti... |
class AudioNTT2020(AudioNTT2020Task6):
def __init__(self, n_mels=64, d=512):
super().__init__(n_mels=n_mels, d=d)
def forward(self, x):
x = super().forward(x)
(x1, _) = torch.max(x, dim=1)
x2 = torch.mean(x, dim=1)
x = (x1 + x2)
assert ((x.shape[1] == self.d) and ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.